3ddb79c3uPGcP_l_2xyGgBSWd5aC-Q xen/include/asm-i386/unaligned.h
3ddb79c2YTaZwOqWin9-QNgHge5RVw xen/include/hypervisor-ifs/block.h
3ddb79c2PMeWTK86y4C3F4MzHw4A1g xen/include/hypervisor-ifs/dom0_ops.h
+3e6377eaioRoNm0m_HSDEAd4Vqrq_w xen/include/hypervisor-ifs/dom_mem_ops.h
3ddb79c25UE59iu4JJcbRalx95mvcg xen/include/hypervisor-ifs/hypervisor-if.h
3ead095dE_VF-QA88rl_5cWYRWtRVQ xen/include/hypervisor-ifs/kbd.h
3ddb79c2oRPrzClk3zbTkRHlpumzKA xen/include/hypervisor-ifs/network.h
3f0c428eIwGr7n9fj4FkBdX2YvA_Rw xen/include/xeno/crc32.h
3ddb79c1V44RD26YqCUm-kqIupM37A xen/include/xeno/ctype.h
3ddb79c05DdHQ0UxX_jKsXdR4QlMCA xen/include/xeno/delay.h
-3e6377eaioRoNm0m_HSDEAd4Vqrq_w xen/include/xeno/dom_mem_ops.h
3ddb79c1uaWQZj551j1O0B5z8AnHOg xen/include/xeno/elevator.h
3ddb79c0HIghfBF8zFUdmXhOU8i6hA xen/include/xeno/errno.h
3ddb79c0rMjudDKkJku_mkm0J-BZgw xen/include/xeno/etherdevice.h
3e5a4e65TNEycLeXqPSXQJQm_xGecA xenolinux-2.4.22-sparse/arch/xeno/defconfig
3e6377f5xwPfYZkPHPrDbEq1PRN7uQ xenolinux-2.4.22-sparse/arch/xeno/drivers/balloon/Makefile
3e6377f8Me8IqtvEhb70XFgOvqQH7A xenolinux-2.4.22-sparse/arch/xeno/drivers/balloon/balloon.c
-3e6377fbMjXWAQd0XN0FWv4fDEo6fg xenolinux-2.4.22-sparse/arch/xeno/drivers/balloon/dom_mem_ops.h
3e5a4e65iHEuC5sjFhj42XALYbLVRw xenolinux-2.4.22-sparse/arch/xeno/drivers/block/Makefile
3f689056Vxx_8K8DQTRysOxx_ikmLg xenolinux-2.4.22-sparse/arch/xeno/drivers/block/info.c
3e5a4e65pP5spJErBW69pJxSSdK9RA xenolinux-2.4.22-sparse/arch/xeno/drivers/block/xl_block.c
return ret;
}
-static inline int HYPERVISOR_pt_update(page_update_request_t *req, int count)
+static inline int HYPERVISOR_mmu_update(mmu_update_t *req, int count)
{
int ret;
__asm__ __volatile__ (
TRAP_INSTR
- : "=a" (ret) : "0" (__HYPERVISOR_pt_update),
+ : "=a" (ret) : "0" (__HYPERVISOR_mmu_update),
"b" (req), "c" (count) );
return ret;
return (ret < 0) ? -1 : op.u.getmemlist.num_pfns;
}
-static int send_pgupdates(page_update_request_t *updates, int nr_updates)
+static int send_pgupdates(mmu_update_t *updates, int nr_updates)
{
int ret = -1;
privcmd_hypercall_t hypercall;
- hypercall.op = __HYPERVISOR_pt_update;
+ hypercall.op = __HYPERVISOR_mmu_update;
hypercall.arg[0] = (unsigned long)updates;
hypercall.arg[1] = (unsigned long)nr_updates;
l1_pgentry_t *vl1tab = NULL, *vl1e = NULL;
l2_pgentry_t *vl2tab = NULL, *vl2e = NULL;
unsigned long *page_array = NULL;
- page_update_request_t *pgt_update_arr = NULL, *pgt_updates = NULL;
+ mmu_update_t *pgt_update_arr = NULL, *pgt_updates = NULL;
int alloc_index, num_pt_pages;
unsigned long l2tab;
unsigned long l1tab = 0;
if ( init_pfn_mapper() < 0 )
goto error_out;
- pgt_updates = malloc((tot_pages + 1024) * 3
- * sizeof(page_update_request_t));
+ pgt_updates = malloc((tot_pages + 1024) * 3 * sizeof(mmu_update_t));
page_array = malloc(tot_pages * sizeof(unsigned long));
pgt_update_arr = pgt_updates;
if ( (pgt_update_arr == NULL) || (page_array == NULL) )
* Pin down l2tab addr as page dir page - causes hypervisor to provide
* correct protection for the page
*/
- pgt_updates->ptr = l2tab | PGREQ_EXTENDED_COMMAND;
- pgt_updates->val = PGEXT_PIN_L2_TABLE;
+ pgt_updates->ptr = l2tab | MMU_EXTENDED_COMMAND;
+ pgt_updates->val = MMUEXT_PIN_L2_TABLE;
pgt_updates++;
num_pgt_updates++;
}
pgt_updates->ptr =
- (page_array[count] << PAGE_SHIFT) | PGREQ_MPT_UPDATE;
+ (page_array[count] << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
pgt_updates->val = count;
pgt_updates++;
num_pgt_updates++;
.data
ENTRY(hypervisor_call_table)
.long SYMBOL_NAME(do_set_trap_table)
- .long SYMBOL_NAME(do_process_page_updates)
+ .long SYMBOL_NAME(do_mmu_update)
.long SYMBOL_NAME(do_console_write)
.long SYMBOL_NAME(do_set_gdt)
.long SYMBOL_NAME(do_stack_switch)
*
* Code to handle memory related requests from domains eg. balloon driver.
*
- * Copyright (c) 2003, B Dragovic
+ * Copyright (c) 2003, B Dragovic & K A Fraser.
*/
#include <xeno/config.h>
#include <xeno/types.h>
#include <xeno/lib.h>
#include <xeno/mm.h>
-#include <xeno/dom_mem_ops.h>
+#include <hypervisor-ifs/dom_mem_ops.h>
#include <xeno/perfc.h>
#include <xeno/sched.h>
#include <xeno/event.h>
#include <asm/domain_page.h>
-#if 1
+#if 0
#define DPRINTK(_f, _a...) printk( _f , ## _a )
#else
#define DPRINTK(_f, _a...) ((void)0)
#endif
-static long alloc_dom_mem(struct task_struct *p, balloon_def_op_t bop)
+static long alloc_dom_mem(struct task_struct *p, reservation_increase_t op)
{
struct list_head *temp;
struct pfn_info *pf; /* pfn_info of current page */
/*
* POLICY DECISION: Each domain has a page limit.
- * NB. The first part of test is because bop.size could be so big that
- * tot_pages + bop.size overflows a u_long.
+ * NB. The first part of test is because op.size could be so big that
+ * tot_pages + op.size overflows a u_long.
*/
- if( (bop.size > p->max_pages) ||
- ((p->tot_pages + bop.size) > p->max_pages) )
+ if( (op.size > p->max_pages) ||
+ ((p->tot_pages + op.size) > p->max_pages) )
return -ENOMEM;
spin_lock_irqsave(&free_list_lock, flags);
- if ( free_pfns < (bop.size + (SLACK_DOMAIN_MEM_KILOBYTES >>
+ if ( free_pfns < (op.size + (SLACK_DOMAIN_MEM_KILOBYTES >>
(PAGE_SHIFT-10))) )
{
spin_unlock_irqrestore(&free_list_lock, flags);
spin_lock(&p->page_lock);
temp = free_list.next;
- for ( i = 0; i < bop.size; i++ )
+ for ( i = 0; i < op.size; i++ )
{
/* Get a free page and add it to the domain's page list. */
pf = list_entry(temp, struct pfn_info, list);
/* Inform the domain of the new page's machine address. */
mpfn = (unsigned long)(pf - frame_table);
- copy_to_user(bop.pages, &mpfn, sizeof(mpfn));
- bop.pages++;
+ copy_to_user(op.pages, &mpfn, sizeof(mpfn));
+ op.pages++;
/* Zero out the page to prevent information leakage. */
va = map_domain_mem(mpfn << PAGE_SHIFT);
spin_unlock(&p->page_lock);
spin_unlock_irqrestore(&free_list_lock, flags);
- return bop.size;
+ return op.size;
}
-static long free_dom_mem(struct task_struct *p, balloon_inf_op_t bop)
+static long free_dom_mem(struct task_struct *p, reservation_decrease_t op)
{
struct list_head *temp;
struct pfn_info *pf; /* pfn_info of current page */
spin_lock(&p->page_lock);
temp = free_list.next;
- for ( i = 0; i < bop.size; i++ )
+ for ( i = 0; i < op.size; i++ )
{
- copy_from_user(&mpfn, bop.pages, sizeof(mpfn));
- bop.pages++;
+ copy_from_user(&mpfn, op.pages, sizeof(mpfn));
+ op.pages++;
if ( mpfn >= max_page )
{
DPRINTK("Domain %d page number out of range (%08lx>=%08lx)\n",
perfc_incrc(need_flush_tlb_flush);
}
- return rc ? rc : bop.size;
+ return rc ? rc : op.size;
}
long do_dom_mem_op(dom_mem_op_t *mem_op)
{
dom_mem_op_t dmop;
- unsigned long ret = 0;
+ unsigned long ret;
if ( copy_from_user(&dmop, mem_op, sizeof(dom_mem_op_t)) )
return -EFAULT;
switch ( dmop.op )
{
- case BALLOON_DEFLATE_OP:
- ret = alloc_dom_mem(current, dmop.u.balloon_deflate);
+ case MEMOP_RESERVATION_INCREASE:
+ ret = alloc_dom_mem(current, dmop.u.increase);
break;
- case BALLOON_INFLATE_OP:
- ret = free_dom_mem(current, dmop.u.balloon_inflate);
+ case MEMOP_RESERVATION_DECREASE:
+ ret = free_dom_mem(current, dmop.u.decrease);
break;
default:
- printk("Bad memory operation request %08x.\n", dmop.op);
+ ret = -ENOSYS;
+ break;
}
return ret;
/*
* A description of the page table API:
*
- * Domains trap to process_page_updates with a list of update requests.
+ * Domains trap to do_mmu_update with a list of update requests.
* This is a list of (ptr, val) pairs, where the requested operation
* is *ptr = val.
*
* Pinning the page type:
* ----------------------
* The type of a page can be pinned/unpinned with the commands
- * PGEXT_[UN]PIN_L?_TABLE. Each page can be pinned exactly once (that is,
+ * MMUEXT_[UN]PIN_L?_TABLE. Each page can be pinned exactly once (that is,
* pinning is not reference counted, so it can't be nested).
* This is useful to prevent a page's type count falling to zero, at which
* point safety checks would need to be carried out next time the count
static int do_extended_command(unsigned long ptr, unsigned long val)
{
int err = 0, cpu = smp_processor_id();
- unsigned int cmd = val & PGEXT_CMD_MASK;
+ unsigned int cmd = val & MMUEXT_CMD_MASK;
unsigned long pfn = ptr >> PAGE_SHIFT;
struct pfn_info *page = frame_table + pfn;
/* 'ptr' must be in range except where it isn't a machine address. */
- if ( (pfn >= max_page) && (cmd != PGEXT_SET_LDT) )
+ if ( (pfn >= max_page) && (cmd != MMUEXT_SET_LDT) )
return 1;
switch ( cmd )
{
- case PGEXT_PIN_L1_TABLE:
+ case MMUEXT_PIN_L1_TABLE:
err = get_l1_table(pfn);
goto mark_as_pinned;
- case PGEXT_PIN_L2_TABLE:
+ case MMUEXT_PIN_L2_TABLE:
err = get_l2_table(pfn);
mark_as_pinned:
if ( unlikely(err) )
}
break;
- case PGEXT_UNPIN_TABLE:
+ case MMUEXT_UNPIN_TABLE:
if ( !DOMAIN_OKAY(page->flags) )
{
err = 1;
}
break;
- case PGEXT_NEW_BASEPTR:
+ case MMUEXT_NEW_BASEPTR:
err = get_l2_table(pfn);
if ( !err )
{
}
break;
- case PGEXT_TLB_FLUSH:
+ case MMUEXT_TLB_FLUSH:
deferred_op[cpu].flush_tlb = 1;
break;
- case PGEXT_INVLPG:
- __flush_tlb_one(val & ~PGEXT_CMD_MASK);
+ case MMUEXT_INVLPG:
+ __flush_tlb_one(val & ~MMUEXT_CMD_MASK);
break;
- case PGEXT_SET_LDT:
+ case MMUEXT_SET_LDT:
{
- unsigned long ents = val >> PGEXT_CMD_SHIFT;
+ unsigned long ents = val >> MMUEXT_CMD_SHIFT;
if ( ((ptr & (PAGE_SIZE-1)) != 0) ||
(ents > 8192) ||
((ptr+ents*LDT_ENTRY_SIZE) < ptr) ||
}
default:
- MEM_LOG("Invalid extended pt command 0x%08lx", val & PGEXT_CMD_MASK);
+ MEM_LOG("Invalid extended pt command 0x%08lx", val & MMUEXT_CMD_MASK);
err = 1;
break;
}
}
-int do_process_page_updates(page_update_request_t *ureqs, int count)
+int do_mmu_update(mmu_update_t *ureqs, int count)
{
- page_update_request_t req;
+ mmu_update_t req;
unsigned long flags, pfn, l1e;
struct pfn_info *page;
int err = 0, i, cpu = smp_processor_id();
unsigned int cmd;
unsigned long cr0 = 0;
- perfc_incrc( calls_to_process_page_updates );
+ perfc_incrc( calls_to_mmu_update );
perfc_addc( num_page_updates, count );
for ( i = 0; i < count; i++ )
spin_lock_irq(¤t->page_lock);
/* Get the page-frame number that a non-extended command references. */
- if ( (cmd == PGREQ_NORMAL_UPDATE) || (cmd == PGREQ_UNCHECKED_UPDATE) )
+ if ( (cmd == MMU_NORMAL_PT_UPDATE) ||
+ (cmd == MMU_UNCHECKED_PT_UPDATE) )
{
if ( cr0 == 0 )
{
switch ( cmd )
{
/*
- * PGREQ_NORMAL_UPDATE: Normal update to any level of page table.
+ * MMU_NORMAL_PT_UPDATE: Normal update to any level of page table.
*/
- case PGREQ_NORMAL_UPDATE:
+ case MMU_NORMAL_PT_UPDATE:
page = frame_table + pfn;
flags = page->flags;
}
break;
- case PGREQ_UNCHECKED_UPDATE:
+ case MMU_UNCHECKED_PT_UPDATE:
req.ptr &= ~(sizeof(l1_pgentry_t) - 1);
if ( likely(IS_PRIV(current)) )
{
}
break;
- case PGREQ_MPT_UPDATE:
+ case MMU_MACHPHYS_UPDATE:
page = frame_table + pfn;
if ( unlikely(pfn >= max_page) )
{
break;
/*
- * PGREQ_EXTENDED_COMMAND: Extended command is specified
+ * MMU_EXTENDED_COMMAND: Extended command is specified
* in the least-siginificant bits of the 'value' field.
*/
- case PGREQ_EXTENDED_COMMAND:
+ case MMU_EXTENDED_COMMAND:
req.ptr &= ~(sizeof(l1_pgentry_t) - 1);
err = do_extended_command(req.ptr, req.val);
break;
--- /dev/null
+/******************************************************************************
+ * dom_mem_ops.h
+ *
+ * Guest OS operations dealing with physical memory reservations.
+ *
+ * Copyright (c) 2003, B Dragovic & K A Fraser.
+ */
+
+#define MEMOP_RESERVATION_INCREASE 0
+#define MEMOP_RESERVATION_DECREASE 1
+
+typedef struct reservation_increase {
+ unsigned long size;
+ unsigned long * pages;
+} reservation_increase_t;
+
+typedef struct reservation_decrease {
+ unsigned long size;
+ unsigned long * pages;
+} reservation_decrease_t;
+
+typedef struct dom_mem_op
+{
+ unsigned int op;
+ union
+ {
+ reservation_increase_t increase;
+ reservation_decrease_t decrease;
+ } u;
+} dom_mem_op_t;
/* EAX = vector; EBX, ECX, EDX, ESI, EDI = args 1, 2, 3, 4, 5. */
#define __HYPERVISOR_set_trap_table 0
-#define __HYPERVISOR_pt_update 1
+#define __HYPERVISOR_mmu_update 1
#define __HYPERVISOR_console_write 2
#define __HYPERVISOR_set_gdt 3
#define __HYPERVISOR_stack_switch 4
/*
- * PAGE UPDATE COMMANDS AND FLAGS
- *
- * PGREQ_XXX: specified in least 2 bits of 'ptr' field. These bits are masked
+ * MMU_XXX: specified in least 2 bits of 'ptr' field. These bits are masked
* off to get the real 'ptr' value.
* All requests specify relevent address in 'ptr'. This is either a
- * machine/physical address (PA), or linear/virtual address (VA).
+ * machine/physical address (MA), or linear/virtual address (VA).
* Normal requests specify update value in 'value'.
* Extended requests specify command in least 8 bits of 'value'. These bits
- * are masked off to get the real 'val' value. Except for PGEXT_SET_LDT
+ * are masked off to get the real 'val' value. Except for MMUEXT_SET_LDT
* which shifts the least bits out.
*/
/* A normal page-table update request. */
-#define PGREQ_NORMAL_UPDATE 0 /* checked '*ptr = val'. ptr is VA. */
+#define MMU_NORMAL_PT_UPDATE 0 /* checked '*ptr = val'. ptr is VA. */
/* DOM0 can make entirely unchecked updates which do not affect refcnts. */
-#define PGREQ_UNCHECKED_UPDATE 1 /* unchecked '*ptr = val'. ptr is VA. */
+#define MMU_UNCHECKED_PT_UPDATE 1 /* unchecked '*ptr = val'. ptr is VA. */
/* Update an entry in the machine->physical mapping table. */
-#define PGREQ_MPT_UPDATE 2 /* ptr = PA of frame to modify entry for */
+#define MMU_MACHPHYS_UPDATE 2 /* ptr = MA of frame to modify entry for */
/* An extended command. */
-#define PGREQ_EXTENDED_COMMAND 3 /* least 8 bits of val demux further */
+#define MMU_EXTENDED_COMMAND 3 /* least 8 bits of val demux further */
/* Extended commands: */
-#define PGEXT_PIN_L1_TABLE 0 /* ptr = PA of frame to pin */
-#define PGEXT_PIN_L2_TABLE 1 /* ptr = PA of frame to pin */
-#define PGEXT_PIN_L3_TABLE 2 /* ptr = PA of frame to pin */
-#define PGEXT_PIN_L4_TABLE 3 /* ptr = PA of frame to pin */
-#define PGEXT_UNPIN_TABLE 4 /* ptr = PA of frame to unpin */
-#define PGEXT_NEW_BASEPTR 5 /* ptr = PA of new pagetable base */
-#define PGEXT_TLB_FLUSH 6 /* ptr = NULL */
-#define PGEXT_INVLPG 7 /* ptr = NULL ; val = page to invalidate */
-#define PGEXT_SET_LDT 8 /* ptr = VA of table; val = # entries */
-#define PGEXT_CMD_MASK 255
-#define PGEXT_CMD_SHIFT 8
+#define MMUEXT_PIN_L1_TABLE 0 /* ptr = MA of frame to pin */
+#define MMUEXT_PIN_L2_TABLE 1 /* ptr = MA of frame to pin */
+#define MMUEXT_PIN_L3_TABLE 2 /* ptr = MA of frame to pin */
+#define MMUEXT_PIN_L4_TABLE 3 /* ptr = MA of frame to pin */
+#define MMUEXT_UNPIN_TABLE 4 /* ptr = MA of frame to unpin */
+#define MMUEXT_NEW_BASEPTR 5 /* ptr = MA of new pagetable base */
+#define MMUEXT_TLB_FLUSH 6 /* ptr = NULL */
+#define MMUEXT_INVLPG 7 /* ptr = NULL ; val = VA to invalidate */
+#define MMUEXT_SET_LDT 8 /* ptr = VA of table; val = # entries */
+#define MMUEXT_CMD_MASK 255
+#define MMUEXT_CMD_SHIFT 8
/* These are passed as 'flags' to update_va_mapping. They can be ORed. */
#define UVMF_FLUSH_TLB 1 /* Flush entire TLB. */
} trap_info_t;
/*
- * Send an array of these to HYPERVISOR_pt_update()
+ * Send an array of these to HYPERVISOR_mmu_update()
*/
typedef struct
{
unsigned long ptr, val; /* *ptr = val */
-} page_update_request_t;
+} mmu_update_t;
/*
* Send an array of these to HYPERVISOR_multicall()
+++ /dev/null
-/******************************************************************************
- * dom_mem_ops.h
- *
- * Header file supporting domain related memory operations. N.B. keep in sync
- * with xen version.
- *
- * Copyright (c) 2003, B Dragovic
- */
-
-#define BALLOON_DEFLATE_OP 0
-#define BALLOON_INFLATE_OP 1
-
-typedef struct balloon_deflate_op {
- unsigned long size;
- unsigned long * pages;
-} balloon_def_op_t;
-
-typedef struct balloon_inflate_op {
- unsigned long size;
- unsigned long * pages;
-} balloon_inf_op_t;
-
-typedef struct dom_mem_ops
-{
- unsigned int op;
- union
- {
- balloon_def_op_t balloon_deflate;
- balloon_inf_op_t balloon_inflate;
- } u;
-} dom_mem_op_t;
#define machine_to_phys_mapping ((unsigned long *)RDWR_MPT_VIRT_START)
/* Part of the domain API. */
-int do_process_page_updates(page_update_request_t *updates, int count);
+int do_mmu_update(mmu_update_t *updates, int count);
#define DEFAULT_GDT_ENTRIES ((LAST_RESERVED_GDT_ENTRY*8)+7)
#define DEFAULT_GDT_ADDRESS ((unsigned long)gdt_table)
PERFCOUNTER_CPU( domain_page_tlb_flush, "domain page tlb flushes" )
PERFCOUNTER_CPU( need_flush_tlb_flush, "PG_need_flush tlb flushes" )
-PERFCOUNTER_CPU( calls_to_process_page_updates, "calls_to_process_page_updates" )
+PERFCOUNTER_CPU( calls_to_mmu_update, "calls_to_mmu_update" )
PERFCOUNTER_CPU( num_page_updates, "num_page_updates" )
#include <asm/uaccess.h>
#include <asm/tlb.h>
-#include "dom_mem_ops.h"
+#include <asm/hypervisor-ifs/dom_mem_ops.h>
/* USER DEFINES -- THESE SHOULD BE COPIED TO USER-SPACE TOOLS */
#define USER_INFLATE_BALLOON 1 /* return mem to hypervisor */
XENO_flush_page_update_queue();
- dom_mem_op.op = BALLOON_INFLATE_OP;
- dom_mem_op.u.balloon_inflate.size = num_pages;
- dom_mem_op.u.balloon_inflate.pages = parray;
+ dom_mem_op.op = MEMOP_RESERVATION_DECREASE;
+ dom_mem_op.u.decrease.size = num_pages;
+ dom_mem_op.u.decrease.pages = parray;
if ( (ret = HYPERVISOR_dom_mem_op(&dom_mem_op)) != num_pages )
{
printk("Unable to inflate balloon, error %lx\n", ret);
{
phys_to_machine_mapping[i] = *curr;
queue_l1_entry_update(
- (pte_t *)((i << PAGE_SHIFT) | PGREQ_MPT_UPDATE), i);
+ (pte_t *)((i << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE), i);
queue_l1_entry_update(
get_ptep((unsigned long)__va(i << PAGE_SHIFT)),
- ((*curr) << PAGE_SHIFT) | L1_PROT);
+ ((*curr) << PAGE_SHIFT) | pgprot_val(PAGE_KERNEL));
*curr = (unsigned long)__va(i << PAGE_SHIFT);
curr++;
parray = (unsigned long *)kmalloc(num_pages * sizeof(unsigned long),
GFP_KERNEL);
- dom_mem_op.op = BALLOON_DEFLATE_OP;
- dom_mem_op.u.balloon_deflate.size = num_pages;
- dom_mem_op.u.balloon_deflate.pages = parray;
+ dom_mem_op.op = MEMOP_RESERVATION_INCREASE;
+ dom_mem_op.u.increase.size = num_pages;
+ dom_mem_op.u.increase.pages = parray;
if((ret = HYPERVISOR_dom_mem_op(&dom_mem_op)) != num_pages){
printk("Unable to deflate balloon, error %lx\n", ret);
goto cleanup;
+++ /dev/null
-/******************************************************************************
- * dom_mem_ops.h
- *
- * Header file supporting domain related memory operations. N.B. keep in sync
- * with xen version.
- *
- * Copyright (c) 2003, B Dragovic
- */
-
-#define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_ACCESSED)
-#define BALLOON_DEFLATE_OP 0
-#define BALLOON_INFLATE_OP 1
-
-typedef struct balloon_deflate_op {
- unsigned long size;
- unsigned long * pages;
-} balloon_def_op_t;
-
-typedef struct balloon_inflate_op {
- unsigned long size;
- unsigned long * pages;
-} balloon_inf_op_t;
-
-typedef struct dom_mem_ops
-{
- unsigned int op;
- union
- {
- balloon_def_op_t balloon_deflate;
- balloon_inf_op_t balloon_inflate;
- }u;
-} dom_mem_op_t;
error_code &= 3;
error_code |= (regs->xcs & 2) << 1;
-#if PT_UPDATE_DEBUG > 0
+#if MMU_UPDATE_DEBUG > 0
if ( (error_code == 0) && (address >= TASK_SIZE) )
{
unsigned long paddr = __pa(address);
int i;
- for ( i = 0; i < pt_update_queue_idx; i++ )
+ for ( i = 0; i < mmu_update_queue_idx; i++ )
{
if ( update_debug_queue[i].ptr == paddr )
{
static spinlock_t update_lock = SPIN_LOCK_UNLOCKED;
#define QUEUE_SIZE 2048
-static page_update_request_t update_queue[QUEUE_SIZE];
-unsigned int pt_update_queue_idx = 0;
-#define idx pt_update_queue_idx
+static mmu_update_t update_queue[QUEUE_SIZE];
+unsigned int mmu_update_queue_idx = 0;
+#define idx mmu_update_queue_idx
-#if PT_UPDATE_DEBUG > 0
+#if MMU_UPDATE_DEBUG > 0
page_update_debug_t update_debug_queue[QUEUE_SIZE] = {{0}};
#undef queue_l1_entry_update
#undef queue_l2_entry_update
static void DEBUG_allow_pt_reads(void)
{
pte_t *pte;
- page_update_request_t update;
+ mmu_update_t update;
int i;
for ( i = idx-1; i >= 0; i-- )
{
update_debug_queue[i].ptep = NULL;
update.ptr = pte;
update.val = update_debug_queue[i].pteval;
- HYPERVISOR_pt_update(&update, 1);
+ HYPERVISOR_mmu_update(&update, 1);
}
}
static void DEBUG_disallow_pt_read(unsigned long va)
* We may fault because of an already outstanding update.
* That's okay -- it'll get fixed up in the fault handler.
*/
- page_update_request_t update;
+ mmu_update_t update;
pgd = pgd_offset_k(va);
pmd = pmd_offset(pgd, va);
pte = pte_offset(pmd, va);
update.ptr = pte;
pteval = *(unsigned long *)pte;
update.val = pteval & ~_PAGE_PRESENT;
- HYPERVISOR_pt_update(&update, 1);
+ HYPERVISOR_mmu_update(&update, 1);
update_debug_queue[idx].ptep = pte;
update_debug_queue[idx].pteval = pteval;
}
#endif
-#if PT_UPDATE_DEBUG > 1
+#if MMU_UPDATE_DEBUG > 1
#undef queue_pt_switch
#undef queue_tlb_flush
#undef queue_invlpg
spin_lock_irqsave(&update_lock, flags);
if ( idx != 0 )
{
-#if PT_UPDATE_DEBUG > 1
+#if MMU_UPDATE_DEBUG > 1
printk("Flushing %d entries from pt update queue\n", idx);
#endif
-#if PT_UPDATE_DEBUG > 0
+#if MMU_UPDATE_DEBUG > 0
DEBUG_allow_pt_reads();
#endif
- queue_multicall2(__HYPERVISOR_pt_update, (unsigned long)update_queue, idx);
+ queue_multicall2(__HYPERVISOR_mmu_update, (unsigned long)update_queue, idx);
idx = 0;
}
spin_unlock_irqrestore(&update_lock, flags);
static inline void __flush_page_update_queue(void)
{
-#if PT_UPDATE_DEBUG > 1
+#if MMU_UPDATE_DEBUG > 1
printk("Flushing %d entries from pt update queue\n", idx);
#endif
-#if PT_UPDATE_DEBUG > 0
+#if MMU_UPDATE_DEBUG > 0
DEBUG_allow_pt_reads();
#endif
- HYPERVISOR_pt_update(update_queue, idx);
+ HYPERVISOR_mmu_update(update_queue, idx);
idx = 0;
}
{
unsigned long flags;
spin_lock_irqsave(&update_lock, flags);
-#if PT_UPDATE_DEBUG > 0
+#if MMU_UPDATE_DEBUG > 0
DEBUG_disallow_pt_read((unsigned long)ptr);
#endif
update_queue[idx].ptr = (unsigned long)ptr;
unsigned long flags;
spin_lock_irqsave(&update_lock, flags);
update_queue[idx].ptr = phys_to_machine(ptr);
- update_queue[idx].ptr |= PGREQ_EXTENDED_COMMAND;
- update_queue[idx].val = PGEXT_NEW_BASEPTR;
+ update_queue[idx].ptr |= MMU_EXTENDED_COMMAND;
+ update_queue[idx].val = MMUEXT_NEW_BASEPTR;
increment_index();
spin_unlock_irqrestore(&update_lock, flags);
}
{
unsigned long flags;
spin_lock_irqsave(&update_lock, flags);
- update_queue[idx].ptr = PGREQ_EXTENDED_COMMAND;
- update_queue[idx].val = PGEXT_TLB_FLUSH;
+ update_queue[idx].ptr = MMU_EXTENDED_COMMAND;
+ update_queue[idx].val = MMUEXT_TLB_FLUSH;
increment_index();
spin_unlock_irqrestore(&update_lock, flags);
}
{
unsigned long flags;
spin_lock_irqsave(&update_lock, flags);
- update_queue[idx].ptr = PGREQ_EXTENDED_COMMAND;
+ update_queue[idx].ptr = MMU_EXTENDED_COMMAND;
update_queue[idx].val = ptr & PAGE_MASK;
- update_queue[idx].val |= PGEXT_INVLPG;
+ update_queue[idx].val |= MMUEXT_INVLPG;
increment_index();
spin_unlock_irqrestore(&update_lock, flags);
}
unsigned long flags;
spin_lock_irqsave(&update_lock, flags);
update_queue[idx].ptr = phys_to_machine(ptr);
- update_queue[idx].ptr |= PGREQ_EXTENDED_COMMAND;
- update_queue[idx].val = PGEXT_PIN_L2_TABLE;
+ update_queue[idx].ptr |= MMU_EXTENDED_COMMAND;
+ update_queue[idx].val = MMUEXT_PIN_L2_TABLE;
increment_index();
spin_unlock_irqrestore(&update_lock, flags);
}
unsigned long flags;
spin_lock_irqsave(&update_lock, flags);
update_queue[idx].ptr = phys_to_machine(ptr);
- update_queue[idx].ptr |= PGREQ_EXTENDED_COMMAND;
- update_queue[idx].val = PGEXT_UNPIN_TABLE;
+ update_queue[idx].ptr |= MMU_EXTENDED_COMMAND;
+ update_queue[idx].val = MMUEXT_UNPIN_TABLE;
increment_index();
spin_unlock_irqrestore(&update_lock, flags);
}
unsigned long flags;
spin_lock_irqsave(&update_lock, flags);
update_queue[idx].ptr = phys_to_machine(ptr);
- update_queue[idx].ptr |= PGREQ_EXTENDED_COMMAND;
- update_queue[idx].val = PGEXT_PIN_L1_TABLE;
+ update_queue[idx].ptr |= MMU_EXTENDED_COMMAND;
+ update_queue[idx].val = MMUEXT_PIN_L1_TABLE;
increment_index();
spin_unlock_irqrestore(&update_lock, flags);
}
unsigned long flags;
spin_lock_irqsave(&update_lock, flags);
update_queue[idx].ptr = phys_to_machine(ptr);
- update_queue[idx].ptr |= PGREQ_EXTENDED_COMMAND;
- update_queue[idx].val = PGEXT_UNPIN_TABLE;
+ update_queue[idx].ptr |= MMU_EXTENDED_COMMAND;
+ update_queue[idx].val = MMUEXT_UNPIN_TABLE;
increment_index();
spin_unlock_irqrestore(&update_lock, flags);
}
{
unsigned long flags;
spin_lock_irqsave(&update_lock, flags);
- update_queue[idx].ptr = PGREQ_EXTENDED_COMMAND | ptr;
- update_queue[idx].val = PGEXT_SET_LDT | (len << PGEXT_CMD_SHIFT);
+ update_queue[idx].ptr = MMU_EXTENDED_COMMAND | ptr;
+ update_queue[idx].val = MMUEXT_SET_LDT | (len << MMUEXT_CMD_SHIFT);
increment_index();
spin_unlock_irqrestore(&update_lock, flags);
}
#if defined(CONFIG_XENO_PRIV)
-#define direct_set_pte(_p, _v) queue_unchecked_pt_update((_p), (_v).pte_low)
+#define direct_set_pte(_p, _v) queue_unchecked_mmu_update((_p), (_v).pte_low)
#define __direct_pte(x) ((pte_t) { (x) } )
#define __direct_mk_pte(page_nr,pgprot) \
__direct_pte(((page_nr) << PAGE_SHIFT) | pgprot_val(pgprot))
* be MACHINE addresses.
*/
-extern unsigned int pt_update_queue_idx;
+extern unsigned int mmu_update_queue_idx;
void queue_l1_entry_update(pte_t *ptr, unsigned long val);
void queue_l2_entry_update(pmd_t *ptr, unsigned long val);
void queue_pte_pin(unsigned long ptr);
void queue_pte_unpin(unsigned long ptr);
void queue_set_ldt(unsigned long ptr, unsigned long bytes);
-#define PT_UPDATE_DEBUG 0
+#define MMU_UPDATE_DEBUG 0
-#define queue_unchecked_pt_update(_p,_v) queue_l1_entry_update( \
- (pte_t *)((unsigned long)(_p)|PGREQ_UNCHECKED_UPDATE),(_v))
+#define queue_unchecked_mmu_update(_p,_v) queue_l1_entry_update( \
+ (pte_t *)((unsigned long)(_p)|MMU_UNCHECKED_PT_UPDATE),(_v))
-#if PT_UPDATE_DEBUG > 0
+#if MMU_UPDATE_DEBUG > 0
typedef struct {
void *ptr;
unsigned long val, pteval;
} page_update_debug_t;
extern page_update_debug_t update_debug_queue[];
#define queue_l1_entry_update(_p,_v) ({ \
- update_debug_queue[pt_update_queue_idx].ptr = (_p); \
- update_debug_queue[pt_update_queue_idx].val = (_v); \
- update_debug_queue[pt_update_queue_idx].line = __LINE__; \
- update_debug_queue[pt_update_queue_idx].file = __FILE__; \
+ update_debug_queue[mmu_update_queue_idx].ptr = (_p); \
+ update_debug_queue[mmu_update_queue_idx].val = (_v); \
+ update_debug_queue[mmu_update_queue_idx].line = __LINE__; \
+ update_debug_queue[mmu_update_queue_idx].file = __FILE__; \
queue_l1_entry_update((_p),(_v)); \
})
#define queue_l2_entry_update(_p,_v) ({ \
- update_debug_queue[pt_update_queue_idx].ptr = (_p); \
- update_debug_queue[pt_update_queue_idx].val = (_v); \
- update_debug_queue[pt_update_queue_idx].line = __LINE__; \
- update_debug_queue[pt_update_queue_idx].file = __FILE__; \
+ update_debug_queue[mmu_update_queue_idx].ptr = (_p); \
+ update_debug_queue[mmu_update_queue_idx].val = (_v); \
+ update_debug_queue[mmu_update_queue_idx].line = __LINE__; \
+ update_debug_queue[mmu_update_queue_idx].file = __FILE__; \
queue_l2_entry_update((_p),(_v)); \
})
#endif
-#if PT_UPDATE_DEBUG > 1
+#if MMU_UPDATE_DEBUG > 1
#undef queue_l1_entry_update
#undef queue_l2_entry_update
#define queue_l1_entry_update(_p,_v) ({ \
- update_debug_queue[pt_update_queue_idx].ptr = (_p); \
- update_debug_queue[pt_update_queue_idx].val = (_v); \
- update_debug_queue[pt_update_queue_idx].line = __LINE__; \
- update_debug_queue[pt_update_queue_idx].file = __FILE__; \
+ update_debug_queue[mmu_update_queue_idx].ptr = (_p); \
+ update_debug_queue[mmu_update_queue_idx].val = (_v); \
+ update_debug_queue[mmu_update_queue_idx].line = __LINE__; \
+ update_debug_queue[mmu_update_queue_idx].file = __FILE__; \
printk("L1 %s %d: %08lx (%08lx -> %08lx)\n", __FILE__, __LINE__, \
(_p), pte_val(_p), \
(unsigned long)(_v)); \
queue_l1_entry_update((_p),(_v)); \
})
#define queue_l2_entry_update(_p,_v) ({ \
- update_debug_queue[pt_update_queue_idx].ptr = (_p); \
- update_debug_queue[pt_update_queue_idx].val = (_v); \
- update_debug_queue[pt_update_queue_idx].line = __LINE__; \
- update_debug_queue[pt_update_queue_idx].file = __FILE__; \
+ update_debug_queue[mmu_update_queue_idx].ptr = (_p); \
+ update_debug_queue[mmu_update_queue_idx].val = (_v); \
+ update_debug_queue[mmu_update_queue_idx].line = __LINE__; \
+ update_debug_queue[mmu_update_queue_idx].file = __FILE__; \
printk("L2 %s %d: %08lx (%08lx -> %08lx)\n", __FILE__, __LINE__, \
(_p), pmd_val(_p), \
(unsigned long)(_v)); \
void _flush_page_update_queue(void);
static inline int flush_page_update_queue(void)
{
- unsigned int idx = pt_update_queue_idx;
+ unsigned int idx = mmu_update_queue_idx;
if ( idx != 0 ) _flush_page_update_queue();
return idx;
}
}
-static inline int HYPERVISOR_pt_update(page_update_request_t *req, int count)
+static inline int HYPERVISOR_mmu_update(mmu_update_t *req, int count)
{
int ret;
__asm__ __volatile__ (
TRAP_INSTR
- : "=a" (ret) : "0" (__HYPERVISOR_pt_update),
+ : "=a" (ret) : "0" (__HYPERVISOR_mmu_update),
"b" (req), "c" (count) );
return ret;
struct page *page = pte_page(pte);
#if defined(CONFIG_XENO_PRIV)
if (pte_io(pte)) {
- queue_unchecked_pt_update(ptep, 0);
+ queue_unchecked_mmu_update(ptep, 0);
continue;
}
#endif